#include <asm/uaccess.h>
#include <asm/tlb.h>
+#include <linux/list.h>
+
/* USER DEFINES -- THESE SHOULD BE COPIED TO USER-SPACE TOOLS */
#define USER_INFLATE_BALLOON 1 /* return mem to hypervisor */
#define USER_DEFLATE_BALLOON 2 /* claim mem from hypervisor */
unsigned long credit;
static unsigned long current_pages, most_seen_pages;
-/*
- * Dead entry written into balloon-owned entries in the PMT.
- * It is deliberately different to INVALID_P2M_ENTRY.
- */
-#define DEAD 0xdead1234
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+
+/* Head of the list of ballooned pages */
+struct page *ball_pg_hd = NULL;
+
+void add_ballooned_page(unsigned long pfn)
+{
+ struct page *p = mem_map + pfn;
+
+ p->private = (unsigned long)ball_pg_hd;
+ ball_pg_hd = p;
+}
+
+struct page *rem_ballooned_page(void)
+{
+ if ( ball_pg_hd != NULL )
+ {
+ struct page *ret = ball_pg_hd;
+ ball_pg_hd = (struct page *)ball_pg_hd->private;
+ return ret;
+ }
+ else
+ return NULL;
+}
+
+#else
+/* List of ballooned pages, threaded through the mem_map array. */
+LIST_HEAD(ballooned_pages);
+
+void add_ballooned_page(unsigned long pfn)
+{
+ struct page *p = mem_map + pfn;
+
+ list_add(&p->list, &ballooned_pages);
+}
+
+struct page *rem_ballooned_page(void)
+{
+ if(!list_empty(&ballooned_pages))
+ {
+ struct list_head *next;
+ struct page *ret;
+
+ next = ballooned_pages.next;
+ ret = list_entry(next, struct page, list);
+ list_del(next);
+
+ return ret;
+ }
+ else
+ return NULL;
+}
+
+#endif
static inline pte_t *get_ptep(unsigned long addr)
{
/* Main function for relinquishing memory. */
static unsigned long inflate_balloon(unsigned long num_pages)
+
{
unsigned long *parray;
unsigned long *currp;
currp = parray;
for ( j = 0; j < i; j++, currp++ )
__free_page((struct page *) (mem_map + *currp));
+
ret = -EFAULT;
goto cleanup;
}
kunmap(&mem_map[*currp]);
}
#endif
- phys_to_machine_mapping[*currp] = DEAD;
+
+ add_ballooned_page(*currp);
+
+ phys_to_machine_mapping[*currp] = INVALID_P2M_ENTRY;
*currp = mfn;
}
* incorporated here.
*/
- unsigned long tot_pages = most_seen_pages;
unsigned long * curr = parray;
unsigned long num_installed;
- unsigned long i;
+
+ struct page *page;
num_installed = 0;
- for ( i = 0; (i < tot_pages) && (num_installed < num); i++ )
+ while ( (page = rem_ballooned_page()) != NULL )
{
- if ( phys_to_machine_mapping[i] == DEAD )
- {
- phys_to_machine_mapping[i] = *curr;
- queue_machphys_update(*curr, i);
- if (i<max_low_pfn)
- queue_l1_entry_update(
- get_ptep((unsigned long)__va(i << PAGE_SHIFT)),
- ((*curr) << PAGE_SHIFT) | pgprot_val(PAGE_KERNEL));
+ unsigned long pfn;
+
+ if ( num_installed == num )
+ break;
- __free_page(mem_map + i);
+ pfn = page - mem_map;
- curr++;
- num_installed++;
+ if(phys_to_machine_mapping[pfn] != INVALID_P2M_ENTRY)
+ {
+ printk("BUG: Tried to unballoon existing page!");
+ BUG();
}
+
+ phys_to_machine_mapping[pfn] = *curr;
+ queue_machphys_update(*curr, pfn);
+ if (pfn<max_low_pfn)
+ queue_l1_entry_update(
+ get_ptep((unsigned long)__va(pfn << PAGE_SHIFT)),
+ ((*curr) << PAGE_SHIFT) | pgprot_val(PAGE_KERNEL));
+
+ __free_page(mem_map + pfn);
+
+ curr++;
+ num_installed++;
}
return num_installed;